import numpy as np
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import pandas as pd
from tensorflow import keras
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import MinMaxScaler
from matplotlib import pyplot
import plotly.graph_objects as go
import math
import seaborn as sns
from sklearn.metrics import mean_squared_error
np.random.seed(1)
tf.random.set_seed(1)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, GRU, Dropout, RepeatVector, TimeDistributed
from keras import backend
MODELFILENAME = 'MODELS/LSTM_12h_TFM_2c'
TIME_STEPS=72 #12h
CMODEL = LSTM
UNITS=45
DROPOUT1=0.118
DROPOUT2=0.243
ACTIVATION='tanh'
OPTIMIZER='adamax'
EPOCHS=43
BATCHSIZE=30
VALIDATIONSPLIT=0.2
# Code to read csv file into Colaboratory:
# from google.colab import files
# uploaded = files.upload()
# import io
# df = pd.read_csv(io.BytesIO(uploaded['SentDATA.csv']))
# Dataset is now stored in a Pandas Dataframe
df = pd.read_csv('../../data/dadesTFM.csv')
df.reset_index(inplace=True)
df['Time'] = pd.to_datetime(df['Time'])
df = df.set_index('Time')
columns = ['PM1','PM25','PM10','PM1ATM','PM25ATM','PM10ATM']
df1 = df.copy();
df1 = df1.rename(columns={"PM 1":"PM1","PM 2.5":"PM25","PM 10":"PM10","PM 1 ATM":"PM1ATM","PM 2.5 ATM":"PM25ATM","PM 10 ATM":"PM10ATM"})
df1['PM1'] = df['PM 1'].astype(np.float32)
df1['PM25'] = df['PM 2.5'].astype(np.float32)
df1['PM10'] = df['PM 10'].astype(np.float32)
df1['PM1ATM'] = df['PM 1 ATM'].astype(np.float32)
df1['PM25ATM'] = df['PM 2.5 ATM'].astype(np.float32)
df1['PM10ATM'] = df['PM 10 ATM'].astype(np.float32)
df2 = df1.copy()
train_size = int(len(df2) * 0.8)
test_size = len(df2) - train_size
train, test = df2.iloc[0:train_size], df2.iloc[train_size:len(df2)]
train.shape, test.shape
((3117, 7), (780, 7))
#Standardize the data
for col in columns:
scaler = StandardScaler()
train[col] = scaler.fit_transform(train[[col]])
<ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]])
def create_sequences(X, y, time_steps=TIME_STEPS):
Xs, ys = [], []
for i in range(len(X)-time_steps):
Xs.append(X.iloc[i:(i+time_steps)].values)
ys.append(y.iloc[i+time_steps])
return np.array(Xs), np.array(ys)
X_train, y_train = create_sequences(train[[columns[1]]], train[columns[1]])
#X_test, y_test = create_sequences(test[[columns[1]]], test[columns[1]])
print(f'X_train shape: {X_train.shape}')
print(f'y_train shape: {y_train.shape}')
X_train shape: (3045, 72, 1) y_train shape: (3045,)
#afegir nova mètrica
def rmse(y_true, y_pred):
return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))
model = Sequential()
model.add(CMODEL(units = UNITS, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(Dropout(rate=DROPOUT1))
model.add(CMODEL(units = UNITS, return_sequences=True))
model.add(Dropout(rate=DROPOUT2))
model.add(TimeDistributed(Dense(1,kernel_initializer='normal',activation=ACTIVATION)))
model.compile(optimizer=OPTIMIZER, loss='mae',metrics=['mse',rmse])
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= lstm (LSTM) (None, 72, 45) 8460 _________________________________________________________________ dropout (Dropout) (None, 72, 45) 0 _________________________________________________________________ lstm_1 (LSTM) (None, 72, 45) 16380 _________________________________________________________________ dropout_1 (Dropout) (None, 72, 45) 0 _________________________________________________________________ time_distributed (TimeDistri (None, 72, 1) 46 ================================================================= Total params: 24,886 Trainable params: 24,886 Non-trainable params: 0 _________________________________________________________________
history = model.fit(X_train, y_train, epochs=EPOCHS, batch_size=BATCHSIZE, validation_split=VALIDATIONSPLIT,
callbacks=[keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, mode='min')], shuffle=False)
Epoch 1/43 82/82 [==============================] - 7s 88ms/step - loss: 0.6946 - mse: 0.8409 - rmse: 0.7121 - val_loss: 0.4488 - val_mse: 0.3903 - val_rmse: 0.4867 Epoch 2/43 82/82 [==============================] - 5s 59ms/step - loss: 0.6184 - mse: 0.6879 - rmse: 0.6599 - val_loss: 0.4104 - val_mse: 0.3641 - val_rmse: 0.4570 Epoch 3/43 82/82 [==============================] - 5s 58ms/step - loss: 0.6079 - mse: 0.6750 - rmse: 0.6540 - val_loss: 0.3933 - val_mse: 0.3547 - val_rmse: 0.4440 Epoch 4/43 82/82 [==============================] - 5s 60ms/step - loss: 0.6003 - mse: 0.6664 - rmse: 0.6502 - val_loss: 0.3827 - val_mse: 0.3492 - val_rmse: 0.4356 Epoch 5/43 82/82 [==============================] - 5s 59ms/step - loss: 0.5948 - mse: 0.6601 - rmse: 0.6479 - val_loss: 0.3752 - val_mse: 0.3453 - val_rmse: 0.4295 Epoch 6/43 82/82 [==============================] - 5s 60ms/step - loss: 0.5905 - mse: 0.6549 - rmse: 0.6460 - val_loss: 0.3705 - val_mse: 0.3427 - val_rmse: 0.4251 Epoch 7/43 82/82 [==============================] - 5s 59ms/step - loss: 0.5872 - mse: 0.6510 - rmse: 0.6444 - val_loss: 0.3665 - val_mse: 0.3406 - val_rmse: 0.4214 Epoch 8/43 82/82 [==============================] - 5s 60ms/step - loss: 0.5847 - mse: 0.6476 - rmse: 0.6432 - val_loss: 0.3638 - val_mse: 0.3390 - val_rmse: 0.4187 Epoch 9/43 82/82 [==============================] - 5s 59ms/step - loss: 0.5824 - mse: 0.6445 - rmse: 0.6419 - val_loss: 0.3605 - val_mse: 0.3372 - val_rmse: 0.4156 Epoch 10/43 82/82 [==============================] - 5s 61ms/step - loss: 0.5804 - mse: 0.6415 - rmse: 0.6407 - val_loss: 0.3595 - val_mse: 0.3364 - val_rmse: 0.4141 Epoch 11/43 82/82 [==============================] - 5s 64ms/step - loss: 0.5788 - mse: 0.6393 - rmse: 0.6396 - val_loss: 0.3563 - val_mse: 0.3348 - val_rmse: 0.4112 Epoch 12/43 82/82 [==============================] - 5s 60ms/step - loss: 0.5773 - mse: 0.6373 - rmse: 0.6386 - val_loss: 0.3555 - val_mse: 0.3340 - val_rmse: 0.4099 Epoch 13/43 82/82 [==============================] - 5s 62ms/step - loss: 0.5757 - mse: 0.6350 - rmse: 0.6373 - val_loss: 0.3538 - val_mse: 0.3329 - val_rmse: 0.4079 Epoch 14/43 82/82 [==============================] - 5s 63ms/step - loss: 0.5747 - mse: 0.6337 - rmse: 0.6366 - val_loss: 0.3519 - val_mse: 0.3319 - val_rmse: 0.4060 Epoch 15/43 82/82 [==============================] - 5s 63ms/step - loss: 0.5736 - mse: 0.6323 - rmse: 0.6358 - val_loss: 0.3506 - val_mse: 0.3311 - val_rmse: 0.4045 Epoch 16/43 82/82 [==============================] - 5s 62ms/step - loss: 0.5726 - mse: 0.6306 - rmse: 0.6347 - val_loss: 0.3496 - val_mse: 0.3303 - val_rmse: 0.4031 Epoch 17/43 82/82 [==============================] - 5s 61ms/step - loss: 0.5718 - mse: 0.6295 - rmse: 0.6341 - val_loss: 0.3484 - val_mse: 0.3297 - val_rmse: 0.4018 Epoch 18/43 82/82 [==============================] - 5s 60ms/step - loss: 0.5709 - mse: 0.6284 - rmse: 0.6335 - val_loss: 0.3475 - val_mse: 0.3291 - val_rmse: 0.4008 Epoch 19/43 82/82 [==============================] - 4s 50ms/step - loss: 0.5703 - mse: 0.6272 - rmse: 0.6328 - val_loss: 0.3469 - val_mse: 0.3288 - val_rmse: 0.3999 Epoch 20/43 82/82 [==============================] - 4s 45ms/step - loss: 0.5694 - mse: 0.6261 - rmse: 0.6320 - val_loss: 0.3464 - val_mse: 0.3284 - val_rmse: 0.3991 Epoch 21/43 82/82 [==============================] - 4s 51ms/step - loss: 0.5691 - mse: 0.6257 - rmse: 0.6318 - val_loss: 0.3458 - val_mse: 0.3281 - val_rmse: 0.3984 Epoch 22/43 82/82 [==============================] - 4s 49ms/step - loss: 0.5683 - mse: 0.6245 - rmse: 0.6311 - val_loss: 0.3453 - val_mse: 0.3277 - val_rmse: 0.3977 Epoch 23/43 82/82 [==============================] - 4s 43ms/step - loss: 0.5676 - mse: 0.6236 - rmse: 0.6305 - val_loss: 0.3445 - val_mse: 0.3274 - val_rmse: 0.3969 Epoch 24/43 82/82 [==============================] - 3s 42ms/step - loss: 0.5672 - mse: 0.6231 - rmse: 0.6303 - val_loss: 0.3441 - val_mse: 0.3273 - val_rmse: 0.3964 Epoch 25/43 82/82 [==============================] - 3s 40ms/step - loss: 0.5666 - mse: 0.6224 - rmse: 0.6296 - val_loss: 0.3432 - val_mse: 0.3269 - val_rmse: 0.3955 Epoch 26/43 82/82 [==============================] - 3s 40ms/step - loss: 0.5661 - mse: 0.6217 - rmse: 0.6291 - val_loss: 0.3433 - val_mse: 0.3269 - val_rmse: 0.3954 Epoch 27/43 82/82 [==============================] - 3s 40ms/step - loss: 0.5657 - mse: 0.6211 - rmse: 0.6288 - val_loss: 0.3427 - val_mse: 0.3266 - val_rmse: 0.3947 Epoch 28/43 82/82 [==============================] - 3s 40ms/step - loss: 0.5652 - mse: 0.6206 - rmse: 0.6283 - val_loss: 0.3425 - val_mse: 0.3264 - val_rmse: 0.3944 Epoch 29/43 82/82 [==============================] - 3s 42ms/step - loss: 0.5650 - mse: 0.6200 - rmse: 0.6280 - val_loss: 0.3417 - val_mse: 0.3262 - val_rmse: 0.3936 Epoch 30/43 82/82 [==============================] - 3s 42ms/step - loss: 0.5646 - mse: 0.6197 - rmse: 0.6276 - val_loss: 0.3418 - val_mse: 0.3262 - val_rmse: 0.3936 Epoch 31/43 82/82 [==============================] - 3s 40ms/step - loss: 0.5641 - mse: 0.6187 - rmse: 0.6271 - val_loss: 0.3417 - val_mse: 0.3262 - val_rmse: 0.3934 Epoch 32/43 82/82 [==============================] - 3s 42ms/step - loss: 0.5638 - mse: 0.6182 - rmse: 0.6268 - val_loss: 0.3411 - val_mse: 0.3259 - val_rmse: 0.3928 Epoch 33/43 82/82 [==============================] - 3s 40ms/step - loss: 0.5632 - mse: 0.6176 - rmse: 0.6262 - val_loss: 0.3407 - val_mse: 0.3258 - val_rmse: 0.3924 Epoch 34/43 82/82 [==============================] - 3s 40ms/step - loss: 0.5631 - mse: 0.6178 - rmse: 0.6260 - val_loss: 0.3403 - val_mse: 0.3256 - val_rmse: 0.3920 Epoch 35/43 82/82 [==============================] - 3s 40ms/step - loss: 0.5627 - mse: 0.6169 - rmse: 0.6256 - val_loss: 0.3396 - val_mse: 0.3253 - val_rmse: 0.3914 Epoch 36/43 82/82 [==============================] - 3s 41ms/step - loss: 0.5623 - mse: 0.6165 - rmse: 0.6253 - val_loss: 0.3395 - val_mse: 0.3253 - val_rmse: 0.3913 Epoch 37/43 82/82 [==============================] - 3s 40ms/step - loss: 0.5622 - mse: 0.6163 - rmse: 0.6250 - val_loss: 0.3393 - val_mse: 0.3254 - val_rmse: 0.3912 Epoch 38/43 82/82 [==============================] - 3s 40ms/step - loss: 0.5617 - mse: 0.6156 - rmse: 0.6246 - val_loss: 0.3392 - val_mse: 0.3253 - val_rmse: 0.3910 Epoch 39/43 82/82 [==============================] - 3s 42ms/step - loss: 0.5616 - mse: 0.6154 - rmse: 0.6244 - val_loss: 0.3393 - val_mse: 0.3251 - val_rmse: 0.3908 Epoch 40/43 82/82 [==============================] - 3s 41ms/step - loss: 0.5610 - mse: 0.6142 - rmse: 0.6234 - val_loss: 0.3389 - val_mse: 0.3246 - val_rmse: 0.3902 Epoch 41/43 82/82 [==============================] - 3s 40ms/step - loss: 0.5607 - mse: 0.6137 - rmse: 0.6230 - val_loss: 0.3386 - val_mse: 0.3244 - val_rmse: 0.3899 Epoch 42/43 82/82 [==============================] - 3s 43ms/step - loss: 0.5606 - mse: 0.6136 - rmse: 0.6227 - val_loss: 0.3386 - val_mse: 0.3242 - val_rmse: 0.3897 Epoch 43/43 82/82 [==============================] - 3s 41ms/step - loss: 0.5600 - mse: 0.6126 - rmse: 0.6219 - val_loss: 0.3387 - val_mse: 0.3240 - val_rmse: 0.3896
import matplotlib.pyplot as plt
plt.plot(history.history['loss'], label='MAE Training loss')
plt.plot(history.history['val_loss'], label='MAE Validation loss')
plt.plot(history.history['mse'], label='MSE Training loss')
plt.plot(history.history['val_mse'], label='MSE Validation loss')
plt.plot(history.history['rmse'], label='RMSE Training loss')
plt.plot(history.history['val_rmse'], label='RMSE Validation loss')
plt.legend();
X_train_pred = model.predict(X_train, verbose=0)
train_mae_loss = np.mean(np.abs(X_train_pred - X_train), axis=1)
plt.hist(train_mae_loss, bins=50)
plt.xlabel('Train MAE loss')
plt.ylabel('Number of Samples');
def evaluate_prediction(predictions, actual, model_name):
errors = predictions - actual
mse = np.square(errors).mean()
rmse = np.sqrt(mse)
mae = np.abs(errors).mean()
print(model_name + ':')
print('Mean Absolute Error: {:.4f}'.format(mae))
print('Root Mean Square Error: {:.4f}'.format(rmse))
print('Mean Square Error: {:.4f}'.format(mse))
print('')
return mae,rmse,mse
mae,rmse,mse = evaluate_prediction(X_train_pred, X_train,"LSTM")
LSTM: Mean Absolute Error: 0.2726 Root Mean Square Error: 0.4941 Mean Square Error: 0.2441
model.save(MODELFILENAME+'.h5')
#càlcul del threshold de test
def calculate_threshold(X_test, X_test_pred):
distance = np.sqrt(np.mean(np.square(X_test_pred - X_test),axis=1))
"""Sorting the scores/diffs and using a 0.80 as cutoff value to pick the threshold"""
distance.sort();
cut_off = int(0.9 * len(distance));
threshold = distance[cut_off];
return threshold
for col in columns:
print ("####################### "+col +" ###########################")
#Standardize the test data
scaler = StandardScaler()
test_cpy = test.copy()
test[col] = scaler.fit_transform(test[[col]])
#creem seqüencia amb finestra temporal per les dades de test
X_test1, y_test1 = create_sequences(test[[col]], test[col])
print(f'Testing shape: {X_test1.shape}')
#evaluem el model
eval = model.evaluate(X_test1, y_test1)
print("evaluate: ",eval)
#predim el model
X_test1_pred = model.predict(X_test1, verbose=0)
evaluate_prediction(X_test1_pred, X_test1,"LSTM")
#càlcul del mae_loss
test1_mae_loss = np.mean(np.abs(X_test1_pred - X_test1), axis=1)
test1_rmse_loss = np.sqrt(np.mean(np.square(X_test1_pred - X_test1),axis=1))
# reshaping test prediction
X_test1_predReshape = X_test1_pred.reshape((X_test1_pred.shape[0] * X_test1_pred.shape[1]), X_test1_pred.shape[2])
# reshaping test data
X_test1Reshape = X_test1.reshape((X_test1.shape[0] * X_test1.shape[1]), X_test1.shape[2])
threshold_test = calculate_threshold(X_test1Reshape,X_test1_predReshape)
test1_score_df = pd.DataFrame(test[TIME_STEPS:])
test1_score_df['loss'] = test1_rmse_loss.reshape((-1))
test1_score_df['threshold'] = threshold_test
test1_score_df['anomaly'] = test1_score_df['loss'] > test1_score_df['threshold']
test1_score_df[col] = test[TIME_STEPS:][col]
#gràfic test lost i threshold
fig = go.Figure()
fig.add_trace(go.Scatter(x=test1_score_df.index, y=test1_score_df['loss'], name='Test loss'))
fig.add_trace(go.Scatter(x=test1_score_df.index, y=test1_score_df['threshold'], name='Threshold'))
fig.update_layout(showlegend=True, title='Test loss vs. Threshold')
fig.show()
#Posem les anomalies en un array
anomalies1 = test1_score_df.loc[test1_score_df['anomaly'] == True]
anomalies1.shape
print('anomalies: ',anomalies1.shape); print();
#Gràfic dels punts i de les anomalíes amb els valors de dades transformades per verificar que la normalització que s'ha fet no distorssiona les dades
fig = go.Figure()
fig.add_trace(go.Scatter(x=test1_score_df.index, y=scaler.inverse_transform(test1_score_df[col]), name=col))
fig.add_trace(go.Scatter(x=anomalies1.index, y=scaler.inverse_transform(anomalies1[col]), mode='markers', name='Anomaly'))
fig.update_layout(showlegend=True, title='Detected anomalies')
fig.show()
print ("######################################################")
####################### PM1 ########################### Testing shape: (708, 72, 1) 5/23 [=====>........................] - ETA: 0s - loss: 0.2395 - mse: 0.1617 - rmse: 0.3301
<ipython-input-17-48420fb1aa44>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy test[col] = scaler.fit_transform(test[[col]])
23/23 [==============================] - 0s 13ms/step - loss: 0.5936 - mse: 0.8489 - rmse: 0.6860 evaluate: [0.5935600996017456, 0.8488835096359253, 0.6860178112983704] LSTM: Mean Absolute Error: 0.2639 Root Mean Square Error: 0.5789 Mean Square Error: 0.3352
anomalies: (223, 10)
###################################################### ####################### PM25 ########################### Testing shape: (708, 72, 1) 6/23 [======>.......................] - ETA: 0s - loss: 0.2583 - mse: 0.1974 - rmse: 0.3350
<ipython-input-17-48420fb1aa44>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
23/23 [==============================] - 0s 12ms/step - loss: 0.6209 - mse: 0.9110 - rmse: 0.7122 evaluate: [0.6209080815315247, 0.9109959006309509, 0.7121900320053101] LSTM: Mean Absolute Error: 0.2759 Root Mean Square Error: 0.5494 Mean Square Error: 0.3018
anomalies: (155, 10)
###################################################### ####################### PM10 ########################### Testing shape: (708, 72, 1) 6/23 [======>.......................] - ETA: 0s - loss: 0.2639 - mse: 0.2060 - rmse: 0.3352
<ipython-input-17-48420fb1aa44>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
23/23 [==============================] - 0s 13ms/step - loss: 0.6429 - mse: 0.9566 - rmse: 0.7343 evaluate: [0.6429368257522583, 0.9566056132316589, 0.7343459725379944] LSTM: Mean Absolute Error: 0.2860 Root Mean Square Error: 0.5246 Mean Square Error: 0.2752
anomalies: (106, 10)
###################################################### ####################### PM1ATM ########################### Testing shape: (708, 72, 1)
<ipython-input-17-48420fb1aa44>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
23/23 [==============================] - 0s 12ms/step - loss: 0.6470 - mse: 0.8864 - rmse: 0.7460 evaluate: [0.6470497250556946, 0.8864234685897827, 0.7459736466407776] LSTM: Mean Absolute Error: 0.2895 Root Mean Square Error: 0.5350 Mean Square Error: 0.2862
anomalies: (143, 10)
###################################################### ####################### PM25ATM ########################### Testing shape: (708, 72, 1) 5/23 [=====>........................] - ETA: 0s - loss: 0.2828 - mse: 0.2130 - rmse: 0.3778
<ipython-input-17-48420fb1aa44>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
23/23 [==============================] - 0s 12ms/step - loss: 0.6406 - mse: 0.8712 - rmse: 0.7382 evaluate: [0.6406124830245972, 0.871248722076416, 0.7382335662841797] LSTM: Mean Absolute Error: 0.2856 Root Mean Square Error: 0.5400 Mean Square Error: 0.2916
anomalies: (144, 10)
###################################################### ####################### PM10ATM ########################### Testing shape: (708, 72, 1) 5/23 [=====>........................] - ETA: 0s - loss: 0.2381 - mse: 0.1588 - rmse: 0.3124
<ipython-input-17-48420fb1aa44>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
23/23 [==============================] - 0s 12ms/step - loss: 0.6299 - mse: 0.8627 - rmse: 0.7211 evaluate: [0.6299321055412292, 0.8626852035522461, 0.7211012840270996] LSTM: Mean Absolute Error: 0.2834 Root Mean Square Error: 0.5394 Mean Square Error: 0.2910
anomalies: (145, 10)
######################################################